home
***
CD-ROM
|
disk
|
FTP
|
other
***
search
/
PC World 2007 September
/
PCWorld_2007-09_cd.bin
/
system
/
ntfs
/
ntfsundelete.exe
/
{app}
/
pyue
/
ntfs.pyc
(
.txt
)
< prev
next >
Wrap
Python Compiled Bytecode
|
2007-07-11
|
27KB
|
690 lines
# Source Generated with Decompyle++
# File: in.pyc (Python 2.4)
import traceback
import threading
import struct
from fstools import *
class CNTFSError(Exception):
def __init__(self, msg, *args, **kwargs):
Exception.__init__(self, msg, *args, **kwargs)
class CNTFSBootSectorError(CNTFSError):
pass
class CNTFSMFTReferenceOverflowError(CNTFSError):
pass
class CNTFSRelatedError(CNTFSError):
def __init__(self, msg, related_exception):
CNTFSError.__init__(self, msg + ' (Reason: %s) ' % str(related_exception))
self.related_exception = related_exception
def extent_map_from_ntfs_extents(e):
'''
convert fstools ntfs extent list
to space.ExtentMap object
'''
try:
R = CExtentList()
for i in xrange(len(e)):
item = e.get_extent(i)
R.add_mapping(item.vcn, item.length, item.lcn)
return R
except Exception:
Value = None
raise CNTFSRelatedError('Cannot convert NTFS extent map', Value)
def load_attr_list(linear_space):
'''
read ATTRIBUTE_LIST object from
linear space
'''
if linear_space.size < 256 * 1024:
raw_data = linear_space.read_data(0, linear_space.size)
try:
return new_ntfs_attrlist_object(raw_data)
raise CNTFSError('Invalid AT_ATTRIBUTE_LIST attribute')
else:
raise CNTFSError('Invalid attribute list size')
class CNTFSDataStream(CLinearSpace):
'''
Named linear space class
'''
def __init__(self, name, linear_space):
'''
__init__(self, name, linear_space)
'''
self._CNTFSDataStream__name = name
self._CNTFSDataStream__linear_space = linear_space
def get_size(self):
return self._CNTFSDataStream__linear_space.get_size()
def _read_data(self, start_pos, count, buffer_size):
return self._CNTFSDataStream__linear_space._read_data(start_pos, count, buffer_size)
name = property((lambda self: self._CNTFSDataStream__name))
class CNTFSIndex:
'''
NTFS index container.
Attributes:
entries - a list of entries in index
type - type of indexed entries
collation_rule - collation rule
index_block_size - size of NTFS index block
'''
def __init__(self, index_block_size = None, collation_rule = None, type = None):
'''__init__(index_block_size=None, collation_rule=None, type=None)'''
self._CNTFSIndex__entries = []
self._CNTFSIndex__type = type
self._CNTFSIndex__collation_rule = collation_rule
self._CNTFSIndex__index_block_size = index_block_size
self._CNTFSIndex__index_blocks = []
def __len__(self):
return len(self._CNTFSIndex__entries)
def __getitem__(self, index):
return self._CNTFSIndex__entries[index]
entries = property((lambda self: self._CNTFSIndex__entries))
type = property((lambda self: self._CNTFSIndex__type))
collation_rule = property((lambda self: self._CNTFSIndex__collation_rule))
index_block_size = property((lambda self: self._CNTFSIndex__index_block_size))
index_blocks = property((lambda self: self._CNTFSIndex__index_blocks))
class CNTFSFile:
def __init__(self, volume, mft_ref):
self._CNTFSFile__volume = volume
self._CNTFSFile__mft_ref = mft_ref
def open(self):
try:
volume = self._CNTFSFile__volume
self._CNTFSFile__data_streams = { }
self._CNTFSFile__file_names = { }
self._CNTFSFile__win32_file_names = { }
self._CNTFSFile__ntfs_attributes = []
self._CNTFSFile__mft_records = []
self._CNTFSFile__mft_refs = [
self._CNTFSFile__mft_ref]
self._CNTFSFile__ntfs_attributes_by_type = { }
self._CNTFSFile__mft_record = mft_record = volume.MFT.load_mft_record(self._CNTFSFile__mft_ref)
self._CNTFSFile__is_directory = mft_record.get_flags() & MFT_RECORD_IS_DIRECTORY
self._CNTFSFile__is_deleted = mft_record.get_flags() & MFT_RECORD_IN_USE == 0
self._CNTFSFile__index_allocations = []
self._CNTFSFile__index_roots = []
self._CNTFSFile__std_infos = []
self._CNTFSFile__default_data_stream_size = 0x0L
self._init_with_mft_entry(mft_record)
except CNTFSError:
Value = None
raise CNTFSRelatedError('Cannot open file %s' % hex(self._CNTFSFile__mft_ref), Value)
def open_if_base(self):
try:
volume = self._CNTFSFile__volume
self._CNTFSFile__data_streams = { }
self._CNTFSFile__file_names = { }
self._CNTFSFile__win32_file_names = { }
self._CNTFSFile__ntfs_attributes = []
self._CNTFSFile__mft_records = []
self._CNTFSFile__mft_refs = [
self._CNTFSFile__mft_ref]
self._CNTFSFile__ntfs_attributes_by_type = { }
self._CNTFSFile__mft_record = mft_record = volume.MFT.load_mft_record(self._CNTFSFile__mft_ref)
if not self.is_base_entry:
raise CNTFSError('Cannot open file on non-base entry %s' % hex(self._CNTFSFile__mft_ref))
self._CNTFSFile__is_directory = mft_record.get_flags() & MFT_RECORD_IS_DIRECTORY
self._CNTFSFile__is_deleted = mft_record.get_flags() & MFT_RECORD_IN_USE == 0
self._CNTFSFile__index_allocations = []
self._CNTFSFile__index_roots = []
self._CNTFSFile__std_infos = []
self._CNTFSFile__default_data_stream_size = 0x0L
self._init_with_mft_entry(mft_record)
except CNTFSError:
Value = None
raise CNTFSRelatedError('Cannot open file %s' % hex(self._CNTFSFile__mft_ref), Value)
def has_file_name(self, file_name):
return self._CNTFSFile__file_names.has_key(file_name)
def read_indexes(self):
volume = self._CNTFSFile__volume
indexes = { }
for index_root in self._CNTFSFile__index_roots:
try:
index_root_object = new_ntfs_indexroot_object(index_root.get_content())
name = index_root.get_name()
index_block_size = index_root_object.get_index_block_size()
collation_rule = index_root_object.get_collation_rule()
type = index_root_object.get_type()
index = CNTFSIndex(index_block_size, collation_rule, type)
trans = lambda x: (x, None)
indexes[name] = index
if type == AT_FILE_NAME:
trans = lambda x: (x, new_ntfs_filename_object(x.get_key()))
for i in xrange(index_root_object.get_count()):
index.entries.append(trans(index_root_object.get_entry(i)))
continue
raise CNTFSError('Invalid INDEX_ROOT attribute')
continue
for attr in self._CNTFSFile__index_allocations:
index = None
index_block_size = self.volume.index_record_size
trans = lambda x: (x, None)
if indexes.has_key(name):
index = indexes[name]
index_block_size = index.index_block_size
if index.type != None and index.type == AT_FILE_NAME:
trans = lambda x: (x, new_ntfs_filename_object(x.get_key()))
else:
index = CNTFSIndex(None, index_block_size)
indexes[name] = index
pos = 0
ls = attr.linear_space
while pos < ls.size:
raw_data = ls.read_data(pos, index_block_size)
try:
index_allocation = new_ntfs_indexblock_object(raw_data, volume.disk.sector_size)
index.index_blocks.append(index_allocation)
except Exception:
Value = None
raise CNTFSError('Invalid INDEX_BLOCK')
try:
for i in xrange(index_allocation.get_count()):
index.entries.append(trans(index_allocation.get_entry(i)))
except:
raise CNTFSError('Invalid index entry')
pos += index_block_size
return indexes
def _init_with_mft_entry(self, mft_record):
volume = self._CNTFSFile__volume
abt = self._CNTFSFile__ntfs_attributes_by_type
self._CNTFSFile__mft_records.append(mft_record)
for i in xrange(mft_record.get_rattr_count()):
attr = mft_record.get_rattribute(i)
self._CNTFSFile__ntfs_attributes.append(attr)
tp = attr.get_type()
if abt.has_key(tp):
abt[tp].append(attr)
else:
abt[tp] = [
attr]
if attr.get_type() == AT_FILE_NAME:
try:
content = attr.get_content()
file_name_object = new_ntfs_filename_object(content)
fname = file_name_object.get_file_name()
self._CNTFSFile__file_names[fname] = file_name_object
if file_name_object.get_file_name_type() & FILE_NAME_WIN32:
self._CNTFSFile__win32_file_names[fname] = file_name_object
except Exception:
Value = None
raise CNTFSRelatedError('Cannot obtain AT_FILE_NAME attribute', Value)
except:
None<EXCEPTION MATCH>Exception
None<EXCEPTION MATCH>Exception
if attr.get_type() == AT_DATA:
content = attr.get_content()
name = attr.get_name()
ls = CreateCRefBufferSpace(content)
if name == '':
self._CNTFSFile__default_data_stream_size = ls.get_size()
self._CNTFSFile__data_streams[name] = CNTFSDataStream(name, ls)
continue
None if attr.get_type() == AT_ATTRIBUTE_LIST else len(content) > 48
for i in xrange(mft_record.get_nattr_count()):
attr = mft_record.get_nattribute(i)
self._CNTFSFile__ntfs_attributes.append(attr)
tp = attr.get_type()
if abt.has_key(tp):
abt[tp].append(attr)
else:
abt[tp] = [
attr]
extents = extent_map_from_ntfs_extents(attr.get_extents())
cs = None
if attr.get_flags() & ATTR_IS_COMPRESSED and attr.get_compression_unit():
cs = CreateCNTFSCompressedClusterSpace(volume.cluster_space, attr.get_compression_unit())
else:
cs = CreateCMappedClusterSpace(volume.cluster_space)
cs.get_extents().extend(extents)
ls = CreateCClusterLinearizer(cs, attr.get_data_size(), 0)
if attr.get_type() == AT_DATA:
name = attr.get_name()
if self._CNTFSFile__data_streams.has_key(name):
self._CNTFSFile__data_streams[name].extents.extend(extents)
else:
self._CNTFSFile__data_streams[name] = ds = CNTFSDataStream(name, ls)
ds.extents = cs.get_extents()
if name == '':
self._CNTFSFile__default_data_stream_size = self._CNTFSFile__data_streams[''].size
name == ''
if attr.get_type() == AT_ATTRIBUTE_LIST:
attr_list = load_attr_list(ls)
for attr_e in attr_list:
mft_ref = attr_e.get_mft_reference() & 0xFFFFFFFFFFFFL
self._CNTFSFile__mft_refs.append(mft_ref)
if mft_ref != self._CNTFSFile__mft_ref:
rec = volume.MFT.load_mft_record(mft_ref)
self._init_with_mft_entry(rec)
continue
def get_standard_info(self):
si = self.std_infos
if len(si) > 0:
si = si[0]
return (si.get_creation_time(), si.get_last_data_change_time(), si.get_last_access_time(), si.get_last_mft_change_time(), si.get_file_attributes())
else:
return (0x0L, 0x0L, 0x0L, 0x0L, 0x0L)
mft_ref = property((lambda self: self._CNTFSFile__mft_ref))
base_mft_ref = property((lambda self: self._CNTFSFile__mft_record.get_base_mft_record() & 0xFFFFFFFFFFFFL))
is_base_entry = property((lambda self: self.base_mft_ref == 0))
data_streams = property((lambda self: self._CNTFSFile__data_streams))
data_stream_list = property((lambda self: self._CNTFSFile__data_streams.values()))
data_stream_count = property((lambda self: len(self._CNTFSFile__data_streams)))
file_names = property((lambda self: self._CNTFSFile__file_names))
file_name_list = property((lambda self: self._CNTFSFile__file_names.values()))
file_name_count = property((lambda self: len(self._CNTFSFile__file_names)))
is_directory = property((lambda self: self._CNTFSFile__is_directory))
is_deleted = property((lambda self: self._CNTFSFile__is_deleted))
volume = property((lambda self: self._CNTFSFile__volume))
ntfs_atributes = property((lambda self: self._CNTFSFile__ntfs_attributes))
ntfs_attributes_by_type = property((lambda self: self._CNTFSFile__ntfs_attributes_by_type))
mft_records = property((lambda self: self._CNTFSFile__mft_records[:]))
mft_refs = property((lambda self: self._CNTFSFile__mft_refs))
win32_file_names = property((lambda self: self._CNTFSFile__win32_file_names))
win32_file_name_list = property((lambda self: self._CNTFSFile__win32_file_names.values()))
win32_file_name_count = property((lambda self: len(self._CNTFSFile__win32_file_names)))
mft_record = property((lambda self: self._CNTFSFile__mft_record))
std_infos = property((lambda self: self._CNTFSFile__std_infos))
default_data_stream_size = property((lambda self: self._CNTFSFile__default_data_stream_size))
class CNTFSMFT(CNTFSFile):
'''
class for $MFT file
'''
def __init__(self, volume):
CNTFSFile.__init__(self, volume, 0)
def open(self):
CNTFSFile.open(self)
self._CNTFSMFT__main_stream = self.data_streams['']
def load_mft_record(self, mft_ref):
if mft_ref < 0:
raise CNTFSMFTReferenceOverflowError('Invalid MFT reference access')
v = self.volume
mr = v.mft_record_size
if mft_ref < 16:
raw_data = v.linear_space.read_data(v.mft_lcn * v.cluster_size + mr * mft_ref, mr)
return parse_mft_record(raw_data, v.disk.sector_size)
elif mft_ref >= self.mft_record_count:
raise CNTFSMFTReferenceOverflowError('Invalid MFT reference access')
raw_data = self.data_streams[''].read_data(mr * mft_ref, mr)
try:
return parse_mft_record(raw_data, v.disk.sector_size)
except:
raise CNTFSError('Invalid MFT entry at %s' % hex(mft_ref))
mft_record_size = property((lambda self: self.volume.mft_record_size))
mft_record_count = property((lambda self: self.data_streams[''].size / self.mft_record_size))
class CNTFSVolume:
def __init__(self, disk, start_sector, sector_count, related_partition = None):
self._CNTFSVolume__start_sector = start_sector
self._CNTFSVolume__sector_count = sector_count
self._CNTFSVolume__related_partition = related_partition
self._CNTFSVolume__disk = disk
self._CNTFSVolume__lck = threading.Lock()
def open_file(self, mft_ref):
self._CNTFSVolume__lck.acquire()
try:
file = CNTFSFile(self, mft_ref)
file.open()
finally:
self._CNTFSVolume__lck.release()
return file
def open_file_if_base(self, mft_ref):
self._CNTFSVolume__lck.acquire()
try:
file = CNTFSFile(self, mft_ref)
file.open_if_base()
finally:
self._CNTFSVolume__lck.release()
return file
def initialize(self):
'''
initialize(self)
Just initialize CNTFSVolume object. Result is None
or any exception, if has any errors
'''
disk = self.disk
self._CNTFSVolume__boot_sector = boot_sector = ntfs_bootsector_from(disk.read_sectors(self.start_sector, 1))
if boot_sector.end_of_sector_marker != 43605:
raise CNTFSBootSectorError('Invalid boot sector marker')
if boot_sector.oem_id != 0x202020205346544EL:
raise CNTFSBootSectorError('Invalid OEM ID of NTFS boot sector')
if boot_sector.clusters_per_mft_record == 0:
raise CNTFSBootSectorError('Invalid MFT record size')
if boot_sector.clusters_per_index_record == 0:
raise CNTFSBootSectorError('Invalid INDX record size')
cluster_size = boot_sector.bpb.bytes_per_sector * boot_sector.bpb.sectors_per_cluster
mft_record_size = 0
index_record_size = 0
if boot_sector.clusters_per_mft_record > 0:
mft_record_size = boot_sector.clusters_per_mft_record * cluster_size
else:
mft_record_size = 1 << -(boot_sector.clusters_per_mft_record)
if boot_sector.clusters_per_index_record > 0:
index_record_size = boot_sector.clusters_per_index_record * cluster_size
else:
index_record_size = 1 << -(boot_sector.clusters_per_index_record)
self._CNTFSVolume__cluster_space = cluster_space = CreateCDiskClusterizer(disk, boot_sector.bpb.sectors_per_cluster, self.sector_count, self.start_sector)
self._CNTFSVolume__linear_space = linear_space = CreateCClusterLinearizer(disk, self.sector_count * disk.sector_size, self.start_sector)
self._CNTFSVolume__mft_lcn = mft_lcn = boot_sector.mft_lcn
self._CNTFSVolume__mftmirr_lcn = mft_mirr_lcn = boot_sector.mftmirr_lcn
self._CNTFSVolume__cluster_size = cluster_size
self._CNTFSVolume__mft_record_size = mft_record_size
self._CNTFSVolume__index_record_size = index_record_size
self._CNTFSVolume__volume_serial_number = boot_sector.volume_serial_number
self._CNTFSVolume__number_of_sectors = boot_sector.number_of_sectors
self._CNTFSVolume__MFT = CNTFSMFT(self)
self._CNTFSVolume__MFT.open()
def mft_ref_for_path(self, full_file_name_and_path):
'''
mft_ref_for_path(self, full_file_name_and_path)
returns mft reference for given existant path, or -1 if
path cannot be resolved.
'''
current_folder = FILE_root
items = filter((lambda x: x != u''), unicode(full_file_name_and_path).split('\\'))
idx = 0
while True:
folder = CNTFSFile(self, current_folder)
folder.open()
indexes = folder.read_indexes()
if idx == len(items):
if folder.has_file_name(items[idx - 1]):
return current_folder
else:
return -1
if not (folder.is_directory) or not indexes.has_key(u'$I30'):
return -1
index = indexes[u'$I30']
Found = False
for entry, related_object in index.entries:
if isinstance(related_object, CNTFSFileNameObject):
if related_object.get_file_name() == items[idx]:
current_folder = entry.get_indexed_file() & 0xFFFFFFFFFFFFL
Found = True
idx += 1
break
related_object.get_file_name() == items[idx]
if not Found:
return -1
continue
def path_for_mft_ref(self, mft_ref):
'''
path_for_mft_ref(self, mft_ref)
returns path for mft reference if possible,
or None, if path cannot be built.
'''
R = ''
f = CNTFSFile(self, mft_ref)
f.open()
while mft_ref != FILE_root:
if len(f.win32_file_names):
file_name = f.win32_file_names.keys()[0]
elif len(f.file_names):
file_name = f.file_names.keys()[0]
else:
return None
mft_ref = f.file_names[file_name].get_parent_directory() & 0xFFFFFFFFFFFFL
f = CNTFSFile(self, mft_ref)
f.open()
if not f.is_directory:
return None
continue
R = file_name + '\\' + R
return '\\' + R
def read_attr_defs(self):
'''
read_attr_defs(self)
returns a list of attr_def entries
in $AttrDef file, or exception, if there
errors.
'''
file = CNTFSFile(self, FILE_AttrDef)
file.open()
if not file.data_streams.has_key(''):
raise CNTFSError('$AttrDef file has no default data stream')
ds = file.data_streams['']
sz = ds.size
if sz % 160 == 0:
count = sz / 160
struc = '=' + '160s' * count
data = struct.unpack(struc, ds.read_data(0, sz))
try:
return map((lambda x: new_ntfs_attrdef_object(x)), data)
raise CNTFSError('Invalid ATTR_DEF records in $AttrDef file')
else:
raise CNTFSError('Invalid $AttrDef data stream size')
def read_volume_information(self):
'''
read_volume_information(self)
returns volume_information object
from $Volume file, or raises an
exception, if errors.
'''
file = CNTFSFile(self, FILE_Volume)
file.open()
if file.ntfs_attributes_by_type.has_key(AT_VOLUME_INFORMATION):
attr = file.ntfs_attributes_by_type[AT_VOLUME_INFORMATION][0]
try:
return new_ntfs_volumeinformation_object(attr.get_content())
raise CNTFSError('Invalid AT_VOLUME_INFORMATION attribute value in file $Volume')
else:
raise CNTFSError('File $Volume has no AT_VOLUME_INFORMATION attribute')
cluster_space = property((lambda self: self._CNTFSVolume__cluster_space))
linear_space = property((lambda self: self._CNTFSVolume__linear_space))
mft_lcn = property((lambda self: self._CNTFSVolume__mft_lcn))
mftmirr_lcn = property((lambda self: self._CNTFSVolume__mftmirr_lcn))
cluster_size = property((lambda self: self._CNTFSVolume__cluster_size))
mft_record_size = property((lambda self: self._CNTFSVolume__mft_record_size))
index_record_size = property((lambda self: self._CNTFSVolume__index_record_size))
volume_serial_number = property((lambda self: self._CNTFSVolume__volume_serial_number))
number_of_sectors = property((lambda self: self._CNTFSVolume__number_of_sectors))
MFT = property((lambda self: self._CNTFSVolume__MFT))
disk = property((lambda self: self._CNTFSVolume__disk))
related_partition = property((lambda self: self._CNTFSVolume__related_partition))
start_sector = property((lambda self: self._CNTFSVolume__start_sector))
sector_count = property((lambda self: self._CNTFSVolume__sector_count))
def test():
'''
import boot
import partition as prt
import validate
import time
import random
def get_logical_partitions(prt_tree):
R = []
for partition in prt_tree:
if isinstance(partition, prt.ExtendedPartition):
R.extend(get_logical_partitions(partition.partitions))
else:
R.append(partition)
return R
def process(disk, partition):
volume = CNTFSVolume(disk, partition.start_sector, partition.sector_count)
volume.initialize()
R = []
T = time.time()
for i in xrange(volume.MFT.mft_record_count):
try:
file = CNTFSFile(volume, i)
file.open()
#if file.is_deleted:
# R.append(file)
#volume.MFT.load_mft_record(i)
#print file.file_names.keys(), file.data_streams.keys()
if i % 0x100 == 0:
print "passed: ", hex(i), disk.get_cache_hits(), disk.get_cache_miss(), "\r",
except Exception, Value:
print "Error at ", hex(i), Value, hex(volume.MFT.mft_record_count)
print "Time: ", time.time() - T, "Items: ", volume.MFT.mft_record_count
try:
logger = validate.StdioLogger()
disk = CreateCCachedDisk(CreateCOSDisk(u"\\\\.\\PhysicalDrive1"), 7, 64)
#disk = CreateCCachedDisk(CreateCOSDisk(u"F:\\vmw\\winxp\\Windows XP Professional-flat.vmdk"), 7, 64)
partitions = get_logical_partitions(prt.PartitionValidator(logger).validate(disk))
for partition in partitions:
if partition.partition_type == boot.PARTITION_IFS:
print "FOUND NTFS at: ", partition.start_sector, partition.sector_count
process(disk, partition)
raw_input()
except Exception, Value:
print "
***Error : ", Value, "****
"
traceback.print_exc()
raw_input()
'''
def on_prog(drive_name, volume, pos, percent):
print 'Progreess: ', percent, '\r',
def on_deleted_file(drive_name, mft_ref, file):
pass
disk = CreateCCachedDisk(CreateCOSDisk(u'\\\\.\\X:'), 7, 64)
volume = CNTFSVolume(disk, 0, disk.sector_count)
volume.initialize()
F = volume.open_file(2)
print F.file_names.keys()
if __name__ == '__main__':
test()